##Load packages

library(tidyverse)
library(raster)
library(rstan)
library(RColorBrewer)
library(rnaturalearth)
library(rnaturalearthdata)
library(sf)
library(ggplot2)
library(ggpubr)
library(gtools)
library(bayesplot)

lunique = function(x) length(unique(x))
factnum =  function(x) as.numeric(as.factor(x))

##Load data and manipulate

# data
qlong = read.csv("../Data/TrophyHuntingSurveyData.csv")[,-1]

#Number of respondents
length(unique(qlong$Response.ID))
## [1] 285
#Number of countries
unique(qlong$What.country.have.you.spent.the.most.time.living.in.)
##  [1] NA                          "sweden"                   
##  [3] "England"                   "Denmark"                  
##  [5] "UK"                        "UK - England"             
##  [7] "United Kingdom"            "Uk"                       
##  [9] "Australia "                "France"                   
## [11] "England "                  "Netherlands "             
## [13] "USA"                       "United States"            
## [15] "UK "                       "United kingdom "          
## [17] "Scotland "                 "Canada and Taiwan"        
## [19] "India"                     "Denmark "                 
## [21] "US"                        "Canada"                   
## [23] "Australia"                 "United Kingdom "          
## [25] "Rhodesia/South Africa"     "The Netherlands "         
## [27] "Italy"                     "Brazi."                   
## [29] "Germany"                   "South Africa"             
## [31] "Lithuania"                 "uk"                       
## [33] "Indonesia"                 "United Kingdom (England)" 
## [35] "Poland"                    "Netherlands"              
## [37] "Sweden"                    "Beijing "                 
## [39] "Great Britain "            "Engalnd "                 
## [41] "Spain"                     "Finland"                  
## [43] "Ireland"                   "Scotland"                 
## [45] "Ik"                        "Germany "                 
## [47] "United States of America"  "Tanzania"                 
## [49] "Belgium "                  "The Netherlands"          
## [51] "UK/Iran"                   "GB"                       
## [53] "Brazil"                    "Kenya"                    
## [55] "Czechia"                   "South Africa "            
## [57] "germany"                   "Wales"                    
## [59] "China"                     "Portugal"                 
## [61] "the Netherlands"           "Czech Republic"           
## [63] "United states"             "Hong Kong"                
## [65] "Israel"                    "United States of America "
## [67] "Belgium"                   "United States "           
## [69] "USA "                      "Switzerland"              
## [71] "Taiwan"                    "UNITED KINGDOM"
print("Countries include: Sweden, UK, Denmark, Australia, France, Netherlands, USA, Taiwan, Canada, India, South Africa, Rhodesia, Italy, Brazil, Germany, Lithuania, Indonesia, Poland, China, Finland, Ireland, Tanzania, Belgium, Iran, Kenya, Czechia, Portugal, Israel, Switzerland")
## [1] "Countries include: Sweden, UK, Denmark, Australia, France, Netherlands, USA, Taiwan, Canada, India, South Africa, Rhodesia, Italy, Brazil, Germany, Lithuania, Indonesia, Poland, China, Finland, Ireland, Tanzania, Belgium, Iran, Kenya, Czechia, Portugal, Israel, Switzerland"
print("29 countries")
## [1] "29 countries"
# split by never or sometimes acceptable - 0 is sometimes acceptable
qlong=qlong %>% mutate(responseid = as.numeric(as.factor(Response.ID)))

scoretab = qlong %>% mutate(responseid = as.numeric(as.factor(Response.ID))) %>%
  group_by(responseid)%>%
  summarise(range=max(accepscore) - min(accepscore),score=mean(accepscore)) 

#histogram of scores
scoretab %>% ggplot(aes(score)) +
  geom_histogram() +
  theme_classic()

# 1/3 write 0 for everything - a handful of fixed scores 
filter(scoretab,range==0) %>% ggplot(aes(score)) +
  geom_histogram() +
  theme_classic()

# fixed at zero are rejectors
qlong=left_join(qlong,scoretab,by="responseid") %>% 
  mutate(clus = ifelse(range==0 & score==0,1,0))

# change some qlong names which are unwieldy
names(qlong)[c(20,31,32,33,34,35,36,37)] = c("duration","age","gender","education.level","country.mosttime","diet","would.hunt","expertise")

# get distinct rows for people
dqlong=qlong %>% distinct(responseid,.keep_all = T) 

# get complete.cases
dqlong = dqlong[,c("age","gender","education.level","diet","expertise","clus","duration")] 
dqlong = dqlong[complete.cases(dqlong),]

##Visualise sample

cols =brewer.pal(8, "Set2")
world = ne_coastline(scale = "medium", returnclass = "sf")
world_countries = ne_countries(scale = "medium", returnclass = "sf")
# Fixing polygons crossing dateline
world = st_wrap_dateline(world)
world_countries = st_wrap_dateline(world_countries)
xmin = st_bbox(world)[["xmin"]]; xmax <- st_bbox(world)[["xmax"]]
ymin = st_bbox(world)[["ymin"]]; ymax <- st_bbox(world)[["ymax"]]
bb = sf::st_union(sf::st_make_grid(st_bbox(c(xmin = xmin,
                                             xmax = xmax,
                                             ymax = ymax,
                                             ymin = ymin),
                                           crs = st_crs(4326)),
                                   n = 100))
equator = st_linestring(matrix(c(-180, 0, 180, 0), ncol = 2, byrow = TRUE))
equator = st_sfc(equator, crs = st_crs(world))
eckertIV =
  "+proj=eck4 +lon_0=0 +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m +no_defs"

qlonglat = filter(qlong,!is.na(Location.Longitude))
qlonglat = sf::st_as_sf(qlonglat, 
                        coords = c("Location.Longitude", "Location.Latitude"),
                        crs = 4269) 
text_sizea = 9

# Map
Fig1a = ggplot(world) +
  geom_sf(data = bb, fill = "grey80") +
  geom_sf(data = equator, color = "gray20", linetype = "dashed",
          linewidth = 0.1)+
  geom_sf(data = world_countries, fill = "grey95", color = NA) +
  geom_sf(color = "gray50", linewidth = 0.1)+
  labs(title = "") +
  geom_sf(data = qlonglat,
          aes(geometry = geometry),
          size = 0.7,
          color = "black",
          alpha=0.1)+
  coord_sf(crs = eckertIV) +
  theme_void()

# Scores
Fig1b = scoretab %>% ggplot(aes(score))+
  geom_histogram(aes(y=..density..),binwidth = 1, position = "identity",alpha=0.5,color="black",fill=cols[1])+
  theme_classic()+  
  theme(axis.text.x = element_text(size = text_sizea), axis.title.x = element_text(size = text_sizea),
        axis.text.y = element_text(size = text_sizea), axis.title.y = element_text(size = text_sizea))+
  scale_x_continuous(expand = c(0,0)) +
  scale_y_continuous(expand = c(0,0)) +
  xlab("Acceptability")+
  ylab("Density")


# Age
# basic stats
mean(dqlong$age)
## [1] 42.30986
sd(dqlong$age)
## [1] 14.06632
range(dqlong$age)
## [1] 18 88
Fig1c = dqlong %>% ggplot(aes(age))+
  geom_histogram(aes(y=..density..),binwidth = 1, position = "identity",alpha=0.5,color="black",fill=cols[1])+
  theme_classic()+  
  theme(axis.text.x = element_text(size = text_sizea), axis.title.x = element_text(size = text_sizea),
        axis.text.y = element_text(size = text_sizea), axis.title.y = element_text(size = text_sizea))+
  scale_x_continuous(expand = c(0,0)) +
  scale_y_continuous(expand = c(0,0)) +
  xlab("Age")+
  ylab("")

# Gender
Fig1d = dqlong %>% 
  mutate(gender = recode(gender, "Non-binary / third gender" = "Non-binary")) %>%
  ggplot(aes(gender)) +
  geom_bar() +
  theme_classic()+  
  theme(axis.text.x = element_text(size = text_sizea), axis.title.x = element_text(size = text_sizea),
        axis.text.y = element_text(size = text_sizea), axis.title.y = element_text(size = text_sizea),
        plot.title = element_text(size = text_sizea))+
  scale_x_discrete(guide = guide_axis(n.dodge=2)) +
  scale_y_continuous(expand = c(0,0)) +
  labs(x = "", y = "Count", title = "Gender")


# Education
levels(as.factor(dqlong$education.level))
## [1] "Bachelors degree"             "Doctoral degree"             
## [3] "High school/College or below" "Masters degree"              
## [5] "Prefer not to say"
# reorder 
dqlong$education.level = factor(dqlong$education.level, levels=c("Prefer not to say","High school/College or below","Bachelors degree","Masters degree","Doctoral degree"))

Fig1e = dqlong %>% 
  mutate(education.level = recode_factor(education.level, "High school/College or below" = "High school or below", "Bachelors degree" = "Bachelors", "Masters degree" = "Masters", "Doctoral degree" = "Doctoral", .ordered = T)) %>%
  ggplot(aes(education.level)) +
  geom_bar() +
  theme_classic()+  
  theme(axis.text.x = element_text(size = text_sizea), axis.title.x = element_text(size = text_sizea),
        axis.text.y = element_text(size = text_sizea), axis.title.y = element_text(size = text_sizea),
        plot.title = element_text(size = text_sizea))+
  scale_x_discrete(guide = guide_axis(n.dodge=2)) +
  scale_y_continuous(expand = c(0,0)) +
  labs(x = "", y = "", title = "Education")

# Diet
dqlong$diet = factor(dqlong$diet, levels=c("Vegan (No meat, fish, or dairy)","Vegetarian (No meat or fish)","Pescatarian (No meat)","Omnivore (Meat, fish, dairy, fruit and vegetables)","Carnivore (Primarily meat)"))

Fig1f = dqlong %>% 
  mutate(diet = recode_factor(diet, "Vegan (No meat, fish, or dairy)" = "Vegan", "Vegetarian (No meat or fish)" = "Vegetarian", "Pescatarian (No meat)" = "Pescatarian", "Omnivore (Meat, fish, dairy, fruit and vegetables)" = "Omnivore", "Carnivore (Primarily meat)" = "Carnivore", .ordered = T)) %>%
  ggplot(aes(diet)) +
  geom_bar() +
  theme_classic()+  
  theme(axis.text.x = element_text(size = text_sizea), axis.title.x = element_text(size = text_sizea),
        axis.text.y = element_text(size = text_sizea), axis.title.y = element_text(size = text_sizea),
        plot.title = element_text(size = text_sizea))+
  scale_x_discrete(guide = guide_axis(n.dodge=2)) +
  scale_y_continuous(expand = c(0,0)) +
  labs(x = "", y = "Count", title = "Diet")

# Expertise
Fig1g = dqlong %>% 
  ggplot(aes(expertise)) +
  geom_bar() +
  theme_classic()+  
  theme(axis.text.x = element_text(size = text_sizea), axis.title.x = element_text(size = text_sizea),
        axis.text.y = element_text(size = text_sizea), axis.title.y = element_text(size = text_sizea),
        plot.title = element_text(size = text_sizea))+
  scale_x_discrete() +
  scale_y_continuous(expand = c(0,0)) +
  labs(x = "", y = "", title = "Expertise")

# arrange
Fig1=ggarrange(Fig1a,
          ggarrange(Fig1b, Fig1c, Fig1d, Fig1e, Fig1f, Fig1g, ncol = 2, nrow = 3, labels = c("B", "C", "D", "E", "F", "G"), font.label = list(size = 10)), nrow = 2, heights = c(1, 1.2), labels = c("A", ""), font.label = list(size = 10))
Fig1

ggsave("../Figures/Fig1.png",Fig1,dpi=300,width=10,height=10)

##Model 1: Rejectors vs Discriminators

#7 people do not identify as male or female
knitr::kable(as.data.frame(table(qlong$gender)), format="markdown")
Var1 Freq
Female 630
Male 760
Non-binary / third gender 15
Prefer not to say 20
#4 people do not wish to list their education level
knitr::kable(as.data.frame(table(qlong$education.level)), format="markdown")
Var1 Freq
Bachelors degree 405
Doctoral degree 390
High school/College or below 220
Masters degree 390
Prefer not to say 20
dqlong_clipped=filter(dqlong,gender %in% c("Male","Female")) %>% filter(!education.level == "Prefer not to say" )

#numer of respondents in the final sample
nrow(dqlong_clipped)
## [1] 273
#numbers of people that answered 0 every time (1) or not (0)
table(dqlong_clipped$clus)
## 
##   0   1 
## 178  95
# prior on simplex
draws = rdirichlet(100,rep(2,5)) #  2 as suggest by mcrealth looks reasonable for the simplex
plot(NULL,xlim=c(1,5),ylim=c(0,0.7))
for(i in 1:100) lines(1:5,draws[i,])

# using an intercept with an baseline intercept of female,highschool,vegan,no expertise
datalistop = list(
  y = dqlong_clipped$clus,
  
  age = dqlong_clipped$age, # keep continuous
  gender = factnum(dqlong_clipped$gender)-1, # 0 female, 1 male
  
  education = factnum(droplevels(dqlong_clipped$education.level)), # (1 high school, 5 doctorate)  
  diet = factnum(dqlong_clipped$diet), # (1 vegan  - 5 carnivore)
  expertise = factnum(dqlong_clipped$expertise), # (1 no -  3 year)
  
  # order pred alphas (these get sent in as vectors) one less than the levels as first level is absorbed into intercept
  alphaeducation = rep(2,lunique(dqlong_clipped$education.level)-1),
  alphadiet = rep(2,lunique(dqlong_clipped$diet)-1),
  alphaexpertise = rep(2,lunique(dqlong_clipped$expertise)-1),
  
  #
  n = nrow(dqlong_clipped)
)

orderpredmod = "
data{
  int<lower=0> n;
  int<lower=0,upper=1> y[n];
  
  // continuous
  vector[n] age;
  
  // categorial
  vector[n] gender;
  
  // ordered predictors
  int education[n];
  int diet[n];
  int expertise[n];
  
  vector[3] alphaeducation;
  vector[4] alphadiet;
  vector[2] alphaexpertise;
}

parameters{
  real intercept;
  real betaage;
  real betagender;
  real betaeducation;
  real betadiet;
  real betaexpertise;
  
  //
  simplex[3] deltaeducation;
  simplex[4] deltadiet;
  simplex[2] deltaexpertise;
}

transformed parameters {
 vector[n] logitmu;
 vector[4] deltaeducation_j;
 vector[5] deltadiet_j;
 vector[3] deltaexpertise_j;
 
 deltaeducation_j = append_row(0,deltaeducation);
 deltadiet_j = append_row(0,deltadiet);
 deltaexpertise_j = append_row(0,deltaexpertise);

 for(i in 1:n) {
  logitmu[i] = intercept + betaage * age[i] +  betagender * gender[i] + betaeducation * sum(deltaeducation_j[1:education[i]]) +  betadiet * sum(deltadiet_j[1:diet[i]]) + betaexpertise * sum(deltaexpertise_j[1:expertise[i]]) ;
 }

}

model {
 intercept ~ normal(0,1.6);
 betaage ~ normal(0,1.6);
 betagender ~ normal(0,1.6);
 betaeducation ~ normal(0,1.6);
 betadiet ~ normal(0,1.6);
 betaexpertise ~ normal(0,1.6);
 
 deltaeducation ~ dirichlet(alphaeducation);
 deltadiet ~ dirichlet(alphadiet);
 deltaexpertise ~ dirichlet(alphaexpertise);
 
 for(i in 1:n) y[i] ~  bernoulli_logit(logitmu[i]);

}

generated quantities{
  vector[n] log_lik;
  vector[n] scores;

  for(j in 1:n) {
      log_lik[j] = bernoulli_logit_lpmf(y[j]| logitmu[j]);
      scores[j] = bernoulli_logit_rng(logitmu[j]);
  }

}


"

ordermod=rstan::stan(model_code =orderpredmod,
                     data=datalistop,
                     iter = 2000,
                     warmup = 1000,
                     chains=4,
                     cores=4,
                     seed = 1234,
                     control = list(adapt_delta = 0.95))
## Running /Library/Frameworks/R.framework/Resources/bin/R CMD SHLIB foo.c
## clang -arch arm64 -I"/Library/Frameworks/R.framework/Resources/include" -DNDEBUG   -I"/Library/Frameworks/R.framework/Versions/4.2-arm64/Resources/library/Rcpp/include/"  -I"/Library/Frameworks/R.framework/Versions/4.2-arm64/Resources/library/RcppEigen/include/"  -I"/Library/Frameworks/R.framework/Versions/4.2-arm64/Resources/library/RcppEigen/include/unsupported"  -I"/Library/Frameworks/R.framework/Versions/4.2-arm64/Resources/library/BH/include" -I"/Library/Frameworks/R.framework/Versions/4.2-arm64/Resources/library/StanHeaders/include/src/"  -I"/Library/Frameworks/R.framework/Versions/4.2-arm64/Resources/library/StanHeaders/include/"  -I"/Library/Frameworks/R.framework/Versions/4.2-arm64/Resources/library/RcppParallel/include/"  -I"/Library/Frameworks/R.framework/Versions/4.2-arm64/Resources/library/rstan/include" -DEIGEN_NO_DEBUG  -DBOOST_DISABLE_ASSERTS  -DBOOST_PENDING_INTEGER_LOG2_HPP  -DSTAN_THREADS  -DUSE_STANC3 -DSTRICT_R_HEADERS  -DBOOST_PHOENIX_NO_VARIADIC_EXPRESSION  -D_HAS_AUTO_PTR_ETC=0  -include '/Library/Frameworks/R.framework/Versions/4.2-arm64/Resources/library/StanHeaders/include/stan/math/prim/fun/Eigen.hpp'  -D_REENTRANT -DRCPP_PARALLEL_USE_TBB=1   -I/opt/R/arm64/include   -fPIC  -falign-functions=64 -Wall -g -O2  -c foo.c -o foo.o
## In file included from <built-in>:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.2-arm64/Resources/library/StanHeaders/include/stan/math/prim/fun/Eigen.hpp:22:
## In file included from /Library/Frameworks/R.framework/Versions/4.2-arm64/Resources/library/RcppEigen/include/Eigen/Dense:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.2-arm64/Resources/library/RcppEigen/include/Eigen/Core:88:
## /Library/Frameworks/R.framework/Versions/4.2-arm64/Resources/library/RcppEigen/include/Eigen/src/Core/util/Macros.h:628:1: error: unknown type name 'namespace'
## namespace Eigen {
## ^
## /Library/Frameworks/R.framework/Versions/4.2-arm64/Resources/library/RcppEigen/include/Eigen/src/Core/util/Macros.h:628:16: error: expected ';' after top level declarator
## namespace Eigen {
##                ^
##                ;
## In file included from <built-in>:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.2-arm64/Resources/library/StanHeaders/include/stan/math/prim/fun/Eigen.hpp:22:
## In file included from /Library/Frameworks/R.framework/Versions/4.2-arm64/Resources/library/RcppEigen/include/Eigen/Dense:1:
## /Library/Frameworks/R.framework/Versions/4.2-arm64/Resources/library/RcppEigen/include/Eigen/Core:96:10: fatal error: 'complex' file not found
## #include <complex>
##          ^~~~~~~~~
## 3 errors generated.
## make: *** [foo.o] Error 1
traceplot(ordermod)

##Visualise Model 1

# extract samples
ordermodsamples = extract(ordermod)

# pp check
(pcheck1=ppc_stat(datalistop$y,ordermodsamples$scores))

ggsave("../Figures/Diagnostics/pcheckmod1.png",pcheck1,dpi=300,width=10,height=5)

# basic predictive stats
maj = function(x,cut=0.5) ifelse((sum(x) / length(x)) >= cut,1,0)
confmat=table(Pred=apply(ordermodsamples$scores,2,maj),Ob=datalistop$y)
sum(diag(confmat))/sum(confmat) # accuracy
## [1] 0.7655678
confmat[4] / sum(confmat[2,]) # precision
## [1] 0.7460317
confmat[1]/ sum(confmat[,1]) # specificity
## [1] 0.9101124
# get table
sumtabordermod = summary(ordermod)$summary
write.table("../Tables/Model1Output.txt")
## "x"
## "1" "../Tables/Model1Output.txt"
# flipped for plot
sumtabordermod = as.data.frame(summary(ordermod, probs = c(0.025, 0.1, 0.25, 0.50, 0.75, 0.9,0.975))$summary)[c(2:6),]
sumtabordermod$names = c("Age", "Gender", "Education", "Diet", "Expertise")
sumtabordermod$col = c("Sig", "Sig", "Sig", "Sig", "NSig")

col_pal = "#218f91"

# Forest plots
Fig2a = ggplot(data = sumtabordermod[which(sumtabordermod$names == "Age"),]) +
  geom_linerange(aes(xmin = `2.5%`, xmax = `97.5%`, y = names), linewidth = 3, alpha = 0.4, colour = col_pal) +
  geom_linerange(aes(xmin = `10%`, xmax = `90%`, y = names), linewidth = 3, alpha = 0.6, colour = col_pal) +
  geom_linerange(aes(xmin = `25%`, xmax = `75%`, y = names), linewidth = 3, alpha = 0.8, colour = col_pal) +
  geom_point(aes(x = `50%`, y = names), size = 4, colour = "black") +
  geom_vline(aes(xintercept = 0), linetype = "dashed") +
  geom_text(aes(x = c(-0.03), y = c(1.3), label = c("Rejection unlikely"))) +
  geom_text(aes(x = c(0.03), y = c(1.3), label = c("Rejection likely"))) +
  coord_cartesian(xlim = c(-0.05, 0.05)) +
  theme_classic() +
  labs(x = "", y = "")

Fig2b = ggplot(data = sumtabordermod[which(sumtabordermod$names != "Age"),]) +
  geom_linerange(aes(xmin = `2.5%`, xmax = `97.5%`, y = reorder(names, -`50%`), colour = col), linewidth = 3, alpha = 0.4) +
  geom_linerange(aes(xmin = `10%`, xmax = `90%`, y = reorder(names, -`50%`), colour = col), linewidth = 3, alpha = 0.6) +
  geom_linerange(aes(xmin = `25%`, xmax = `75%`, y = reorder(names, -`50%`), colour = col), linewidth = 3, alpha = 0.8) +
  geom_point(aes(x = `50%`, y = names), size = 4, colour = "black") +
  geom_vline(aes(xintercept = 0), linetype = "dashed") +
  coord_cartesian(xlim = c(-3, 3)) +
  scale_colour_manual(values = c("grey20", col_pal), guide = "none") +
  theme_classic() +
  labs(x = expression(beta), y = "")

# sort for plot on response scale 
ordermodsampleswide=data.frame(do.call(cbind,ordermodsamples))
names(ordermodsampleswide)
##   [1] "intercept"     "betaage"       "betagender"    "betaeducation"
##   [5] "betadiet"      "betaexpertise" "V7"            "V8"           
##   [9] "V9"            "V10"           "V11"           "V12"          
##  [13] "V13"           "V14"           "V15"           "V16"          
##  [17] "V17"           "V18"           "V19"           "V20"          
##  [21] "V21"           "V22"           "V23"           "V24"          
##  [25] "V25"           "V26"           "V27"           "V28"          
##  [29] "V29"           "V30"           "V31"           "V32"          
##  [33] "V33"           "V34"           "V35"           "V36"          
##  [37] "V37"           "V38"           "V39"           "V40"          
##  [41] "V41"           "V42"           "V43"           "V44"          
##  [45] "V45"           "V46"           "V47"           "V48"          
##  [49] "V49"           "V50"           "V51"           "V52"          
##  [53] "V53"           "V54"           "V55"           "V56"          
##  [57] "V57"           "V58"           "V59"           "V60"          
##  [61] "V61"           "V62"           "V63"           "V64"          
##  [65] "V65"           "V66"           "V67"           "V68"          
##  [69] "V69"           "V70"           "V71"           "V72"          
##  [73] "V73"           "V74"           "V75"           "V76"          
##  [77] "V77"           "V78"           "V79"           "V80"          
##  [81] "V81"           "V82"           "V83"           "V84"          
##  [85] "V85"           "V86"           "V87"           "V88"          
##  [89] "V89"           "V90"           "V91"           "V92"          
##  [93] "V93"           "V94"           "V95"           "V96"          
##  [97] "V97"           "V98"           "V99"           "V100"         
## [101] "V101"          "V102"          "V103"          "V104"         
## [105] "V105"          "V106"          "V107"          "V108"         
## [109] "V109"          "V110"          "V111"          "V112"         
## [113] "V113"          "V114"          "V115"          "V116"         
## [117] "V117"          "V118"          "V119"          "V120"         
## [121] "V121"          "V122"          "V123"          "V124"         
## [125] "V125"          "V126"          "V127"          "V128"         
## [129] "V129"          "V130"          "V131"          "V132"         
## [133] "V133"          "V134"          "V135"          "V136"         
## [137] "V137"          "V138"          "V139"          "V140"         
## [141] "V141"          "V142"          "V143"          "V144"         
## [145] "V145"          "V146"          "V147"          "V148"         
## [149] "V149"          "V150"          "V151"          "V152"         
## [153] "V153"          "V154"          "V155"          "V156"         
## [157] "V157"          "V158"          "V159"          "V160"         
## [161] "V161"          "V162"          "V163"          "V164"         
## [165] "V165"          "V166"          "V167"          "V168"         
## [169] "V169"          "V170"          "V171"          "V172"         
## [173] "V173"          "V174"          "V175"          "V176"         
## [177] "V177"          "V178"          "V179"          "V180"         
## [181] "V181"          "V182"          "V183"          "V184"         
## [185] "V185"          "V186"          "V187"          "V188"         
## [189] "V189"          "V190"          "V191"          "V192"         
## [193] "V193"          "V194"          "V195"          "V196"         
## [197] "V197"          "V198"          "V199"          "V200"         
## [201] "V201"          "V202"          "V203"          "V204"         
## [205] "V205"          "V206"          "V207"          "V208"         
## [209] "V209"          "V210"          "V211"          "V212"         
## [213] "V213"          "V214"          "V215"          "V216"         
## [217] "V217"          "V218"          "V219"          "V220"         
## [221] "V221"          "V222"          "V223"          "V224"         
## [225] "V225"          "V226"          "V227"          "V228"         
## [229] "V229"          "V230"          "V231"          "V232"         
## [233] "V233"          "V234"          "V235"          "V236"         
## [237] "V237"          "V238"          "V239"          "V240"         
## [241] "V241"          "V242"          "V243"          "V244"         
## [245] "V245"          "V246"          "V247"          "V248"         
## [249] "V249"          "V250"          "V251"          "V252"         
## [253] "V253"          "V254"          "V255"          "V256"         
## [257] "V257"          "V258"          "V259"          "V260"         
## [261] "V261"          "V262"          "V263"          "V264"         
## [265] "V265"          "V266"          "V267"          "V268"         
## [269] "V269"          "V270"          "V271"          "V272"         
## [273] "V273"          "V274"          "V275"          "V276"         
## [277] "V277"          "V278"          "V279"          "V280"         
## [281] "V281"          "V282"          "V283"          "V284"         
## [285] "V285"          "V286"          "V287"          "V288"         
## [289] "V289"          "V290"          "V291"          "V292"         
## [293] "V293"          "V294"          "V295"          "V296"         
## [297] "V297"          "V298"          "V299"          "V300"         
## [301] "V301"          "V302"          "V303"          "V304"         
## [305] "V305"          "V306"          "V307"          "V308"         
## [309] "V309"          "V310"          "V311"          "V312"         
## [313] "V313"          "V314"          "V315"          "V316"         
## [317] "V317"          "V318"          "V319"          "V320"         
## [321] "V321"          "V322"          "V323"          "V324"         
## [325] "V325"          "V326"          "V327"          "V328"         
## [329] "V329"          "V330"          "V331"          "V332"         
## [333] "V333"          "V334"          "V335"          "V336"         
## [337] "V337"          "V338"          "V339"          "V340"         
## [341] "V341"          "V342"          "V343"          "V344"         
## [345] "V345"          "V346"          "V347"          "V348"         
## [349] "V349"          "V350"          "V351"          "V352"         
## [353] "V353"          "V354"          "V355"          "V356"         
## [357] "V357"          "V358"          "V359"          "V360"         
## [361] "V361"          "V362"          "V363"          "V364"         
## [365] "V365"          "V366"          "V367"          "V368"         
## [369] "V369"          "V370"          "V371"          "V372"         
## [373] "V373"          "V374"          "V375"          "V376"         
## [377] "V377"          "V378"          "V379"          "V380"         
## [381] "V381"          "V382"          "V383"          "V384"         
## [385] "V385"          "V386"          "V387"          "V388"         
## [389] "V389"          "V390"          "V391"          "V392"         
## [393] "V393"          "V394"          "V395"          "V396"         
## [397] "V397"          "V398"          "V399"          "V400"         
## [401] "V401"          "V402"          "V403"          "V404"         
## [405] "V405"          "V406"          "V407"          "V408"         
## [409] "V409"          "V410"          "V411"          "V412"         
## [413] "V413"          "V414"          "V415"          "V416"         
## [417] "V417"          "V418"          "V419"          "V420"         
## [421] "V421"          "V422"          "V423"          "V424"         
## [425] "V425"          "V426"          "V427"          "V428"         
## [429] "V429"          "V430"          "V431"          "V432"         
## [433] "V433"          "V434"          "V435"          "V436"         
## [437] "V437"          "V438"          "V439"          "V440"         
## [441] "V441"          "V442"          "V443"          "V444"         
## [445] "V445"          "V446"          "V447"          "V448"         
## [449] "V449"          "V450"          "V451"          "V452"         
## [453] "V453"          "V454"          "V455"          "V456"         
## [457] "V457"          "V458"          "V459"          "V460"         
## [461] "V461"          "V462"          "V463"          "V464"         
## [465] "V465"          "V466"          "V467"          "V468"         
## [469] "V469"          "V470"          "V471"          "V472"         
## [473] "V473"          "V474"          "V475"          "V476"         
## [477] "V477"          "V478"          "V479"          "V480"         
## [481] "V481"          "V482"          "V483"          "V484"         
## [485] "V485"          "V486"          "V487"          "V488"         
## [489] "V489"          "V490"          "V491"          "V492"         
## [493] "V493"          "V494"          "V495"          "V496"         
## [497] "V497"          "V498"          "V499"          "V500"         
## [501] "V501"          "V502"          "V503"          "V504"         
## [505] "V505"          "V506"          "V507"          "V508"         
## [509] "V509"          "V510"          "V511"          "V512"         
## [513] "V513"          "V514"          "V515"          "V516"         
## [517] "V517"          "V518"          "V519"          "V520"         
## [521] "V521"          "V522"          "V523"          "V524"         
## [525] "V525"          "V526"          "V527"          "V528"         
## [529] "V529"          "V530"          "V531"          "V532"         
## [533] "V533"          "V534"          "V535"          "V536"         
## [537] "V537"          "V538"          "V539"          "V540"         
## [541] "V541"          "V542"          "V543"          "V544"         
## [545] "V545"          "V546"          "V547"          "V548"         
## [549] "V549"          "V550"          "V551"          "V552"         
## [553] "V553"          "V554"          "V555"          "V556"         
## [557] "V557"          "V558"          "V559"          "V560"         
## [561] "V561"          "V562"          "V563"          "V564"         
## [565] "V565"          "V566"          "V567"          "V568"         
## [569] "V569"          "V570"          "V571"          "V572"         
## [573] "V573"          "V574"          "V575"          "V576"         
## [577] "V577"          "V578"          "V579"          "V580"         
## [581] "V581"          "V582"          "V583"          "V584"         
## [585] "V585"          "V586"          "V587"          "V588"         
## [589] "V589"          "V590"          "V591"          "V592"         
## [593] "V593"          "V594"          "V595"          "V596"         
## [597] "V597"          "V598"          "V599"          "V600"         
## [601] "V601"          "V602"          "V603"          "V604"         
## [605] "V605"          "V606"          "V607"          "V608"         
## [609] "V609"          "V610"          "V611"          "V612"         
## [613] "V613"          "V614"          "V615"          "V616"         
## [617] "V617"          "V618"          "V619"          "V620"         
## [621] "V621"          "V622"          "V623"          "V624"         
## [625] "V625"          "V626"          "V627"          "V628"         
## [629] "V629"          "V630"          "V631"          "V632"         
## [633] "V633"          "V634"          "V635"          "V636"         
## [637] "V637"          "V638"          "V639"          "V640"         
## [641] "V641"          "V642"          "V643"          "V644"         
## [645] "V645"          "V646"          "V647"          "V648"         
## [649] "V649"          "V650"          "V651"          "V652"         
## [653] "V653"          "V654"          "V655"          "V656"         
## [657] "V657"          "V658"          "V659"          "V660"         
## [661] "V661"          "V662"          "V663"          "V664"         
## [665] "V665"          "V666"          "V667"          "V668"         
## [669] "V669"          "V670"          "V671"          "V672"         
## [673] "V673"          "V674"          "V675"          "V676"         
## [677] "V677"          "V678"          "V679"          "V680"         
## [681] "V681"          "V682"          "V683"          "V684"         
## [685] "V685"          "V686"          "V687"          "V688"         
## [689] "V689"          "V690"          "V691"          "V692"         
## [693] "V693"          "V694"          "V695"          "V696"         
## [697] "V697"          "V698"          "V699"          "V700"         
## [701] "V701"          "V702"          "V703"          "V704"         
## [705] "V705"          "V706"          "V707"          "V708"         
## [709] "V709"          "V710"          "V711"          "V712"         
## [713] "V713"          "V714"          "V715"          "V716"         
## [717] "V717"          "V718"          "V719"          "V720"         
## [721] "V721"          "V722"          "V723"          "V724"         
## [725] "V725"          "V726"          "V727"          "V728"         
## [729] "V729"          "V730"          "V731"          "V732"         
## [733] "V733"          "V734"          "V735"          "V736"         
## [737] "V737"          "V738"          "V739"          "V740"         
## [741] "V741"          "V742"          "V743"          "V744"         
## [745] "V745"          "V746"          "V747"          "V748"         
## [749] "V749"          "V750"          "V751"          "V752"         
## [753] "V753"          "V754"          "V755"          "V756"         
## [757] "V757"          "V758"          "V759"          "V760"         
## [761] "V761"          "V762"          "V763"          "V764"         
## [765] "V765"          "V766"          "V767"          "V768"         
## [769] "V769"          "V770"          "V771"          "V772"         
## [773] "V773"          "V774"          "V775"          "V776"         
## [777] "V777"          "V778"          "V779"          "V780"         
## [781] "V781"          "V782"          "V783"          "V784"         
## [785] "V785"          "V786"          "V787"          "V788"         
## [789] "V789"          "V790"          "V791"          "V792"         
## [793] "V793"          "V794"          "V795"          "V796"         
## [797] "V797"          "V798"          "V799"          "V800"         
## [801] "V801"          "V802"          "V803"          "V804"         
## [805] "V805"          "V806"          "V807"          "V808"         
## [809] "V809"          "V810"          "V811"          "V812"         
## [813] "V813"          "V814"          "V815"          "V816"         
## [817] "V817"          "V818"          "V819"          "V820"         
## [821] "V821"          "V822"          "V823"          "V824"         
## [825] "V825"          "V826"          "V827"          "V828"         
## [829] "V829"          "V830"          "V831"          "V832"         
## [833] "V833"          "V834"          "V835"          "V836"         
## [837] "V837"          "V838"          "V839"          "V840"         
## [841] "V841"          "V842"          "V843"          "V844"         
## [845] "V845"          "V846"          "lp__"
names(ordermodsampleswide) = c(names(ordermodsampleswide)[1:6],"deltaeducation1","deltaeducation2","deltaeducation3","deltadiet1",
                               "deltadiet2","deltadiet3","deltadiet4","deltaexpertise1","deltaexpertise2")
ordermodsampleswide=ordermodsampleswide[,1:15]

# starting dataset
datalistopwide = data.frame(do.call(cbind,datalistop))
datalistopwide = datalistopwide[,2:6]

# design is based around using apply on the parameter set above and different matching length datasets (e.g. fixing all but one for marginal effects)
# the table of parameter draws goes in along with a single row of data - and the result for each parameter set comes out
inv_logit = function (x) { # from rethinking
  p = 1/(1 + exp(-x))
  p = ifelse(x == Inf, 1, p)
  p
}

runmodel = function(parameters, data){
   # params
  intercept = parameters[1]
  betaage = parameters[2]
  betagender = parameters[3]
  betaeducation = parameters[4]
  betadiet = parameters[5]
  betaexpertise = parameters[6]
  
  deltaeducation0 = 0
  deltaeducation1 = parameters[7] 
  deltaeducation2 = parameters[8] 
  deltaeducation3 = parameters[9]
  educationdeltas = c(deltaeducation0,deltaeducation1,deltaeducation2,deltaeducation3)
  
  deltadiet0 = 0
  deltadiet1 = parameters[10] 
  deltadiet2 = parameters[11] 
  deltadiet3 = parameters[12]
  deltadiet4 = parameters[13]
  dietdeltas = c(deltadiet0,deltadiet1,deltadiet2,deltadiet3,deltadiet4)
  
  deltaexpertise0 = 0
  deltaexpertise1 = parameters[14] 
  deltaexpertise2 = parameters[15] 
  expertisedeltas = c(deltaexpertise0,deltaexpertise1,deltaexpertise2)
  
  # 
  age = data[1] 
  gender = data[2] # coming in binary with 0 female
  education = data[3] # this coming in as numeric values 1 to N education levels
  diet = data[4]
  expertise =  data[5]
  
  
  mu = intercept + betaage * age + betagender * gender + betaeducation * sum(educationdeltas[1:education]) +
    betadiet * sum(dietdeltas[1:diet]) +  betaexpertise * sum(expertisedeltas[1:expertise])
  
  return(inv_logit(mu)) # probabilty of being a dogmatic
  
}

# this function runs the previous function for multiple rows of data
# and then gives mean and credible interval for each datarow given parameter uncertainty
runmodelanddata = function(parameters,data){
 
 out = data.frame(matrix(NA,ncol=7,nrow=nrow(data)))
 for(i in 1:nrow(data)) {
  mus = apply(parameters,1,runmodel,data=as.numeric(data[i,]))
  meanmus = mean(mus)
  lci1=quantile(mus,p=0.025)
  lci2=quantile(mus,p=0.1)
  lci3=quantile(mus,p=0.25)
  uci3=quantile(mus,p=0.75)
  uci2=quantile(mus,p=0.9)
  uci1=quantile(mus,p=0.975)
  out[i,] = c(meanmus,lci1,lci2, lci3, uci3, uci2, uci1)
 }
 
 names(out) = c("mean","lci1","lci2", "lci3", "uci3", "uci2", "uci1")
 out
}

# quick test
testout = runmodelanddata(ordermodsampleswide,datalistopwide)
plot(datalistopwide$age,testout$mean)

plot(datalistopwide$gender,testout$mean)

plot(datalistopwide$education,testout$mean)

plot(datalistopwide$diet,testout$mean)

# now we need to generate dataframes of the marginal effects - have to be a bit careful because 
# I think responses will be non-linearly dependent on the values of the other variables, but median age
# and most common category seems sensible
median(datalistopwide$age) ; range(datalistopwide$age)
## [1] 39
## [1] 18 88
table(datalistopwide$gender)
## 
##   0   1 
## 123 150
table(datalistopwide$education)
## 
##  1  2  3  4 
## 43 81 73 76
table(datalistopwide$diet)
## 
##   1   2   3   4   5 
##  44  42  14 170   3
table(datalistopwide$expertise)
## 
##   1   2   3 
## 181  59  33
# Age
agemarginal = data.frame(age=18:88,
                         gender=1,
                         education=2,
                         diet=4,
                         expertise=1)

ageout = runmodelanddata(ordermodsampleswide,agemarginal)

Fig2c = cbind.data.frame(agemarginal,ageout) %>%
  ggplot(aes(age,mean))+
  geom_ribbon(aes(ymin=lci3,ymax=uci3), fill = col_pal, alpha = 0.8)+
  geom_ribbon(aes(ymin=lci2,ymax=uci2), fill = col_pal, alpha = 0.6)+
  geom_ribbon(aes(ymin=lci1,ymax=uci1), fill = col_pal, alpha = 0.4)+
  geom_line(lwd=2)  +
  theme_classic()+  
  scale_x_continuous(expand = c(0,0)) +
  scale_y_continuous(expand = c(0,0)) +
  theme(axis.text.x = element_text(size = text_sizea), axis.title.x = element_text(size = text_sizea),
        axis.text.y = element_text(size = text_sizea), axis.title.y = element_text(size = text_sizea),
        plot.title = element_text(size = text_sizea))+
  labs(x = "Age", y = "Probability of rejection", title = "Age") 

# Gender
gendermarginal = data.frame(age=39,
                         gender=c(0,1),
                         education=2,
                         diet=4,
                         expertise=1)

genderout = runmodelanddata(ordermodsampleswide,gendermarginal)

Fig2d = cbind.data.frame(gendermarginal,genderout) %>%
  ggplot(aes(as.factor(gender),mean))+
  geom_linerange(aes(ymin=lci3,ymax=uci3), colour = col_pal, alpha = 0.8, size = 3)+
  geom_linerange(aes(ymin=lci2,ymax=uci2), colour = col_pal, alpha = 0.6, size = 3)+
  geom_linerange(aes(ymin=lci1,ymax=uci1), colour = col_pal, alpha = 0.4, size = 3)+
  geom_point(size=4)  +
  theme_classic()+  
  theme(axis.text.x = element_text(size = text_sizea), axis.title.x = element_text(size = text_sizea),
        axis.text.y = element_text(size = text_sizea), axis.title.y = element_text(size = text_sizea),
        plot.title = element_text(size = text_sizea))+
  labs(x = "", y = "Probability of rejection", title = "Gender")+
  ylab("Probability of rejection")+
  scale_x_discrete(breaks=c("0","1"),labels=c("Female","Male"))

# Education
educationmarginal = data.frame(age=39,
                            gender=1,
                            education=1:4,
                            diet=4,
                            expertise=1)

educationout = runmodelanddata(ordermodsampleswide,educationmarginal)

Fig2e = cbind.data.frame(educationmarginal,educationout) %>%
  ggplot(aes(as.factor(education),mean))+
  geom_linerange(aes(ymin=lci3,ymax=uci3), colour = col_pal, alpha = 0.8, size = 3)+
  geom_linerange(aes(ymin=lci2,ymax=uci2), colour = col_pal, alpha = 0.6, size = 3)+
  geom_linerange(aes(ymin=lci1,ymax=uci1), colour = col_pal, alpha = 0.4, size = 3)+
  geom_point(size=4)  +
  theme_classic()+  
  theme(axis.text.x = element_text(size = text_sizea), axis.title.x = element_text(size = text_sizea),
        axis.text.y = element_text(size = text_sizea), axis.title.y = element_text(size = text_sizea),
        plot.title = element_text(size = text_sizea))+
  labs(x = "", y = "Probability of rejection", title = "Education")+
  scale_x_discrete(breaks=c("1","2","3","4"),labels=c("Highschool","Bachelors","Masters","Doctorate"), guide = guide_axis(n.dodge=2))

# Diet
dietmarginal = data.frame(age=39,
                               gender=1,
                               education=2,
                               diet=1:5,
                               expertise=1)

dietout = runmodelanddata(ordermodsampleswide,dietmarginal)

Fig2f = cbind.data.frame(dietmarginal,dietout) %>%
  ggplot(aes(as.factor(diet),mean))+
  geom_linerange(aes(ymin=lci3,ymax=uci3), colour = col_pal, alpha = 0.8, size = 3)+
  geom_linerange(aes(ymin=lci2,ymax=uci2), colour = col_pal, alpha = 0.6, size = 3)+
  geom_linerange(aes(ymin=lci1,ymax=uci1), colour = col_pal, alpha = 0.4, size = 3)+
  geom_point(size=4)  +
  theme_classic()+  
  theme(axis.text.x = element_text(size = text_sizea), axis.title.x = element_text(size = text_sizea),
        axis.text.y = element_text(size = text_sizea), axis.title.y = element_text(size = text_sizea), 
        plot.title = element_text(size = text_sizea))+
  labs(x = "", y = "Probability of rejection", title = "Diet")+
  scale_x_discrete(breaks=c("1","2","3","4","5"),labels=c("Vegan","Vegetarian","Pescatarian","Omnivore","Carnivore"), guide = guide_axis(n.dodge=2))


Fig2 = ggarrange(
  ggarrange(Fig2a, Fig2b, heights = c(0.4,1), ncol = 1, nrow = 2, align = "hv", labels = c("A", "B")),
  ggarrange(Fig2c, Fig2d, Fig2e, Fig2f, ncol = 2, nrow = 2, labels = c("C","D","E","F")),
  ncol = 2, widths = c(1,1.5), align = "hv"
)
Fig2

ggsave("../Figures/Fig2.png",Fig2,dpi=300,width=10,height=5)

##Model 2: Acceptability

spltqlong = split(qlong,qlong$clus)
include = spltqlong$`0`
exclude= spltqlong$`1`

# beta correction function
betacorrect = function(x) {
  o = if(x / 100 ==0) {
    0.001
  } else if  (x / 100 ==1){
    0.999
  } else {
    x /100
  }
  o
}

# probably good as supplementary to include everyone - show not arbitrarily splitting the data
# everyone 
datalistall = list(
  accep = sapply(qlong$accepscore,betacorrect) ,
  nparticipants = lunique(qlong$Response.ID),
  person = factnum(qlong$Response.ID),
  conservation = qlong$conservpick, 
  localcomm = qlong$localcommpick,
  land = qlong$landpick, 
  weapon = qlong$weaponpick, 
  picture = qlong$afterpick, 
  location = factnum(qlong$hunthunter), 
  locationcorrector = ifelse(factnum(qlong$hunthunter)==1,0,1), 
  charisma = qlong$lgcharismascore,
  n = nrow(qlong)
)

# only those pragmatic
datalistinclude = list(
  accep = sapply(include$accepscore,betacorrect) ,
  nparticipants = lunique(include$Response.ID),
  person = factnum(include$Response.ID),
  conservation = include$conservpick, 
  localcomm = include$localcommpick,
  land = include$landpick, 
  weapon = include$weaponpick, 
  picture = include$afterpick, 
  location = ifelse(factnum(include$hunthunter) == 2 | factnum(include$hunthunter) == 3, 1, 0), 
  locationcorrector = ifelse(factnum(include$hunthunter)==1,0,1), 
  charisma = include$lgcharismascore,
  n = nrow(include)
)


# remember phi here is precision the reciprocal of the variance 
multilevelcovar="
  data {
    int<lower=0> n;
    int<lower=0> nparticipants;
    int person[n];
    
    vector[n] accep;
 
    int conservation[n];
    int localcomm[n];
    int land[n];
    int weapon[n];
    int picture[n];
    int location[n];
    int locationcorrector[n];
    vector[n] charisma;
  }
  
  parameters{
    real betaconservation;
    real betalocal;
    real betaland;
    real betaweapon;
    real betapicture;
    real betalocation;
    real betacharisma;
    
    // hierarchical parameters capturing the covariance of
    // mean and precision of the beta 
    real a_bar;
    real<lower=0> b_bar;
    vector<lower=0>[2] sigma_id;
    
    corr_matrix[2] Rho;
    
    vector[nparticipants] id_a;
    vector<lower=0>[nparticipants] id_b;
  
  }
  
  transformed parameters {
    vector[n] lmu;
    vector<lower=0>[n] phi;
    
    for(i in 1:n){
      lmu[i]  = inv_logit(id_a[person[i]] + betaconservation * conservation[i] + betalocal* localcomm [i] + betaland * land[i] + betaweapon * weapon[i] + betapicture * picture[i] + betalocation * location[i] + betacharisma * charisma[i]  );
      phi[i] =  id_b[person[i]]; // we would put a log link here if we want to model stuff and we'd drop the lower=0 on id_b 
    }
  
  }
  
  model{
  
    betaconservation ~ normal(0,1.5);
    betalocal ~ normal(0,1.5);
    betaland ~ normal(0,1.5);
    betaweapon ~ normal(0,1.5);
    betapicture ~ normal(0,1.5);
    betalocation ~ normal(0,1.5);
  
    
    a_bar ~ normal(0,1.5);
    b_bar ~ exponential(1);
    sigma_id ~ exponential(1);
    
    vector[2] idoff[nparticipants];
    vector[2] MU;
    MU = [a_bar, b_bar]';
    Rho ~ lkj_corr(2);
    
    for(j in 1:nparticipants) {
        idoff[j] = [id_a[j], id_b[j]]';
      }
    idoff ~ multi_normal(MU, quad_form_diag(Rho, sigma_id));
    
    for(i in 1:n){
      target += beta_proportion_lpdf(accep[i]| lmu[i] , phi[i]);
    }
  
  
  }
  
  generated quantities {
  vector[n] log_lik;
  vector[n] scores;
  vector[n] resids;
  for(j in 1:n) {
    log_lik[j] = beta_proportion_lpdf(accep[j]| lmu[j] , phi[j]);
    scores[j] = beta_proportion_rng(lmu[j] , phi[j]);
    resids[j] = accep[j] - beta_proportion_rng(lmu[j] , phi[j]);
  }
  }


"
# run models
alldatamod=rstan::stan(model_code =multilevelcovar,
               data=datalistall,
               iter = 2000,
               warmup = 1000,
               chains=4,
               cores=4,
               seed = 1234,
               control = list(adapt_delta = 0.95))
## Running /Library/Frameworks/R.framework/Resources/bin/R CMD SHLIB foo.c
## clang -arch arm64 -I"/Library/Frameworks/R.framework/Resources/include" -DNDEBUG   -I"/Library/Frameworks/R.framework/Versions/4.2-arm64/Resources/library/Rcpp/include/"  -I"/Library/Frameworks/R.framework/Versions/4.2-arm64/Resources/library/RcppEigen/include/"  -I"/Library/Frameworks/R.framework/Versions/4.2-arm64/Resources/library/RcppEigen/include/unsupported"  -I"/Library/Frameworks/R.framework/Versions/4.2-arm64/Resources/library/BH/include" -I"/Library/Frameworks/R.framework/Versions/4.2-arm64/Resources/library/StanHeaders/include/src/"  -I"/Library/Frameworks/R.framework/Versions/4.2-arm64/Resources/library/StanHeaders/include/"  -I"/Library/Frameworks/R.framework/Versions/4.2-arm64/Resources/library/RcppParallel/include/"  -I"/Library/Frameworks/R.framework/Versions/4.2-arm64/Resources/library/rstan/include" -DEIGEN_NO_DEBUG  -DBOOST_DISABLE_ASSERTS  -DBOOST_PENDING_INTEGER_LOG2_HPP  -DSTAN_THREADS  -DUSE_STANC3 -DSTRICT_R_HEADERS  -DBOOST_PHOENIX_NO_VARIADIC_EXPRESSION  -D_HAS_AUTO_PTR_ETC=0  -include '/Library/Frameworks/R.framework/Versions/4.2-arm64/Resources/library/StanHeaders/include/stan/math/prim/fun/Eigen.hpp'  -D_REENTRANT -DRCPP_PARALLEL_USE_TBB=1   -I/opt/R/arm64/include   -fPIC  -falign-functions=64 -Wall -g -O2  -c foo.c -o foo.o
## In file included from <built-in>:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.2-arm64/Resources/library/StanHeaders/include/stan/math/prim/fun/Eigen.hpp:22:
## In file included from /Library/Frameworks/R.framework/Versions/4.2-arm64/Resources/library/RcppEigen/include/Eigen/Dense:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.2-arm64/Resources/library/RcppEigen/include/Eigen/Core:88:
## /Library/Frameworks/R.framework/Versions/4.2-arm64/Resources/library/RcppEigen/include/Eigen/src/Core/util/Macros.h:628:1: error: unknown type name 'namespace'
## namespace Eigen {
## ^
## /Library/Frameworks/R.framework/Versions/4.2-arm64/Resources/library/RcppEigen/include/Eigen/src/Core/util/Macros.h:628:16: error: expected ';' after top level declarator
## namespace Eigen {
##                ^
##                ;
## In file included from <built-in>:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.2-arm64/Resources/library/StanHeaders/include/stan/math/prim/fun/Eigen.hpp:22:
## In file included from /Library/Frameworks/R.framework/Versions/4.2-arm64/Resources/library/RcppEigen/include/Eigen/Dense:1:
## /Library/Frameworks/R.framework/Versions/4.2-arm64/Resources/library/RcppEigen/include/Eigen/Core:96:10: fatal error: 'complex' file not found
## #include <complex>
##          ^~~~~~~~~
## 3 errors generated.
## make: *** [foo.o] Error 1
traceplot(alldatamod)

includedatamod=rstan::stan(model_code =multilevelcovar,
                        data=datalistinclude,
                        iter = 2000,
                        warmup = 1000,
                        chains=4,
                        cores=4,
                        seed = 1234,
                        control = list(adapt_delta = 0.95))
traceplot(includedatamod)

##Visualise Model 2

# pp check
alldatascores = extract(alldatamod,pars=c("scores"))
includescores = extract(includedatamod,pars=c("scores"))

(pcheck2=ppc_stat(datalistall$accep,alldatascores$scores))

(pcheck3=ppc_stat(datalistall$accep,alldatascores$scores,stat="sd"))

ggsave("../Figures/Diagnostics/pcheckmod2.png",pcheck2,dpi=300,width=10,height=5)
ggsave("../Figures/Diagnostics/pcheckmod3.png",pcheck3,dpi=300,width=10,height=5)

(pcheck4=ppc_stat(datalistinclude$accep,includescores$scores))

(pcheck5=ppc_stat(datalistinclude$accep,includescores$scores,stat="sd"))

ggsave("../Figures/Diagnostics/pcheckmod4.png",pcheck4,dpi=300,width=10,height=5)
ggsave("../Figures/Diagnostics/pcheckmod5.png",pcheck5,dpi=300,width=10,height=5)


# Tables to save
alldatasum = summary(alldatamod)$summary 
includesum = summary(includedatamod)$summary 

write.table(alldatasum,"../Tables/Model2AllOutput.txt")
write.table(includesum,"../Tables/Model2IncludeOutput.txt")
# flipped for plot
sumincludedatamod= as.data.frame(summary(includedatamod, probs = c(0.025, 0.1, 0.25, 0.50, 0.75, 0.9,0.975))$summary)[c(1:7),]
sumincludedatamod$names = c("Hunt has no impact on species local conservation status", "Hunt revenue is shared with the local community", "Hunt revenue supports wildlife habitat protection" , "Hunter uses a rifle, not a bow", "Hunter poses for a photo with the animal", "Hunter hunts outside of their region i.e. modern colonialism", "Species charisma")
sumincludedatamod$col = c("Sig", "Sig", "Sig", "Sig", "Sig", "Sig", "NSig")

col_pal = "#218f91"

# Forest plots
Fig3a = ggplot(data = sumincludedatamod[which(sumincludedatamod$names == "Species charisma"),]) +
  geom_linerange(aes(xmin = `2.5%`, xmax = `97.5%`, y = names), linewidth = 3, alpha = 0.4, colour = "grey20") +
  geom_linerange(aes(xmin = `10%`, xmax = `90%`, y = names), linewidth = 3, alpha = 0.6, colour =  "grey20") +
  geom_linerange(aes(xmin = `25%`, xmax = `75%`, y = names), linewidth = 3, alpha = 0.8, colour =  "grey20") +
  geom_point(aes(x = `50%`, y = names), size = 4, colour = "black") +
  geom_vline(aes(xintercept = 0), linetype = "dashed") +
  coord_cartesian(xlim = c(-0.2, 0.2)) +
  theme_classic() +
  labs(x = "", y = "")

Fig3b = ggplot(data = sumincludedatamod[which(sumincludedatamod$names != "Species charisma"),]) +
  geom_linerange(aes(xmin = `2.5%`, xmax = `97.5%`, y = reorder(names, -`50%`), colour = col), linewidth = 3, alpha = 0.4) +
  geom_linerange(aes(xmin = `10%`, xmax = `90%`, y = reorder(names, -`50%`), colour = col), linewidth = 3, alpha = 0.6) +
  geom_linerange(aes(xmin = `25%`, xmax = `75%`, y = reorder(names, -`50%`), colour = col), linewidth = 3, alpha = 0.8) +
  geom_point(aes(x = `50%`, y = names), size = 4, colour = "black") +
  geom_vline(aes(xintercept = 0), linetype = "dashed") +
  coord_cartesian(xlim = c(-1.2, 1.2)) +
  scale_colour_manual(values = c(col_pal), guide = "none") +
  geom_text(aes(x = c(-1), y = c("Hunter hunts outside of their region i.e. modern colonialism"), label = c("Less acceptable"))) +
  geom_text(aes(x = c(1), y = c("Hunter hunts outside of their region i.e. modern colonialism"), label = c("More acceptable"))) +
  theme_classic() +
  labs(x = expression(beta), y = "")

Fig3 = ggarrange(Fig3a, Fig3b, ncol = 1, nrow = 2, heights = c(0.3, 1), align = "hv", labels = c("A", "B"))
Fig3

ggsave("../Figures/Fig3.png",Fig3,dpi=300,width=10,height=5)

t.test(log(duration) ~ clus, data = dqlong[which(dqlong$duration < 10000),])
## 
##  Welch Two Sample t-test
## 
## data:  log(duration) by clus
## t = 5.6264, df = 261.51, p-value = 4.726e-08
## alternative hypothesis: true difference in means between group 0 and group 1 is not equal to 0
## 95 percent confidence interval:
##  0.2731105 0.5671883
## sample estimates:
## mean in group 0 mean in group 1 
##        5.771756        5.351607
exp(5.77)
## [1] 320.5377
exp(5.35)
## [1] 210.6083

##Session info

sessionInfo()
## R version 4.2.3 (2023-03-15)
## Platform: aarch64-apple-darwin20 (64-bit)
## Running under: macOS 15.4.1
## 
## Matrix products: default
## BLAS:   /Library/Frameworks/R.framework/Versions/4.2-arm64/Resources/lib/libRblas.0.dylib
## LAPACK: /Library/Frameworks/R.framework/Versions/4.2-arm64/Resources/lib/libRlapack.dylib
## 
## locale:
## [1] en_US.UTF-8/en_US.UTF-8/en_US.UTF-8/C/en_US.UTF-8/en_US.UTF-8
## 
## attached base packages:
## [1] stats     graphics  grDevices utils     datasets  methods   base     
## 
## other attached packages:
##  [1] bayesplot_1.11.1        gtools_3.9.5            ggpubr_0.6.0           
##  [4] sf_1.0-15               rnaturalearthdata_1.0.0 rnaturalearth_1.0.1    
##  [7] RColorBrewer_1.1-3      rstan_2.32.5            StanHeaders_2.32.5     
## [10] raster_3.6-26           sp_2.1-3                lubridate_1.9.3        
## [13] forcats_1.0.0           stringr_1.5.1           dplyr_1.1.4            
## [16] purrr_1.0.2             readr_2.1.5             tidyr_1.3.1            
## [19] tibble_3.2.1            ggplot2_3.5.0           tidyverse_2.0.0        
## 
## loaded via a namespace (and not attached):
##  [1] matrixStats_1.2.0  httr_1.4.7         tools_4.2.3        backports_1.4.1   
##  [5] bslib_0.6.1        utf8_1.2.4         R6_2.5.1           KernSmooth_2.23-22
##  [9] DBI_1.2.1          colorspace_2.1-0   withr_3.0.1        tidyselect_1.2.0  
## [13] gridExtra_2.3      processx_3.8.3     compiler_4.2.3     textshaping_0.3.7 
## [17] cli_3.6.2          labeling_0.4.3     sass_0.4.8         scales_1.3.0      
## [21] classInt_0.4-10    callr_3.7.3        proxy_0.4-27       QuickJSR_1.1.3    
## [25] systemfonts_1.0.5  digest_0.6.34      rmarkdown_2.25     pkgconfig_2.0.3   
## [29] htmltools_0.5.7    fastmap_1.1.1      highr_0.10         rlang_1.1.3       
## [33] rstudioapi_0.15.0  jquerylib_0.1.4    generics_0.1.3     farver_2.1.1      
## [37] jsonlite_1.8.8     car_3.1-2          inline_0.3.19      magrittr_2.0.3    
## [41] loo_2.8.0          s2_1.1.6           Rcpp_1.0.12        munsell_0.5.0     
## [45] fansi_1.0.6        abind_1.4-5        lifecycle_1.0.4    terra_1.7-71      
## [49] stringi_1.8.3      yaml_2.3.8         carData_3.0-5      plyr_1.8.9        
## [53] pkgbuild_1.4.3     grid_4.2.3         parallel_4.2.3     lattice_0.22-5    
## [57] cowplot_1.1.3      hms_1.1.3          ps_1.8.1           knitr_1.45        
## [61] pillar_1.9.0       ggsignif_0.6.4     reshape2_1.4.4     codetools_0.2-19  
## [65] stats4_4.2.3       wk_0.9.1           glue_1.7.0         evaluate_0.23     
## [69] RcppParallel_5.1.7 vctrs_0.6.5        tzdb_0.4.0         gtable_0.3.4      
## [73] cachem_1.0.8       xfun_0.42          broom_1.0.5        e1071_1.7-14      
## [77] rstatix_0.7.2      ragg_1.2.7         class_7.3-22       units_0.8-5       
## [81] timechange_0.3.0